Dependencies

In [1]:
!pip3 install keras tensorflow-gpu cudatoolkit cudnn
!pip3 install matplotlib
!pip3 install numpy
!pip3 install pandas
!pip3 install scikit-learn
!pip3 install seaborn
!pip3 install 'tensorflow[and-cuda]'
Requirement already satisfied: keras in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (3.8.0)
Collecting tensorflow-gpu
  Using cached tensorflow-gpu-2.12.0.tar.gz (2.6 kB)
  Preparing metadata (setup.py) ... error
  error: subprocess-exited-with-error
  
  × python setup.py egg_info did not run successfully.
  │ exit code: 1
  ╰─> [39 lines of output]
      Traceback (most recent call last):
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/requirements.py", line 36, in __init__
          parsed = _parse_requirement(requirement_string)
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_parser.py", line 62, in parse_requirement
          return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_parser.py", line 80, in _parse_requirement
          url, specifier, marker = _parse_requirement_details(tokenizer)
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_parser.py", line 124, in _parse_requirement_details
          marker = _parse_requirement_marker(
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_parser.py", line 145, in _parse_requirement_marker
          tokenizer.raise_syntax_error(
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_tokenizer.py", line 167, in raise_syntax_error
          raise ParserSyntaxError(
      packaging._tokenizer.ParserSyntaxError: Expected end or semicolon (after name and no valid version specifier)
          python_version>"3.7"
                        ^
      
      The above exception was the direct cause of the following exception:
      
      Traceback (most recent call last):
        File "<string>", line 2, in <module>
        File "<pip-setuptools-caller>", line 34, in <module>
        File "/tmp/pip-install-qihvbstl/tensorflow-gpu_b8dd19a571e24d91b3952fa47b1e1527/setup.py", line 40, in <module>
          setuptools.setup()
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/__init__.py", line 116, in setup
          _install_setup_requires(attrs)
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/__init__.py", line 87, in _install_setup_requires
          dist.parse_config_files(ignore_option_errors=True)
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/dist.py", line 610, in parse_config_files
          self._finalize_requires()
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/dist.py", line 344, in _finalize_requires
          self._normalize_requires()
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/dist.py", line 359, in _normalize_requires
          self.install_requires = list(map(str, _reqs.parse(install_requires)))
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/requirements.py", line 38, in __init__
          raise InvalidRequirement(str(e)) from e
      packaging.requirements.InvalidRequirement: Expected end or semicolon (after name and no valid version specifier)
          python_version>"3.7"
                        ^
      [end of output]
  
  note: This error originates from a subprocess, and is likely not a problem with pip.
Collecting tensorflow-gpu
  Using cached tensorflow-gpu-2.12.0.tar.gz (2.6 kB)
  Preparing metadata (setup.py) ... error
  error: subprocess-exited-with-error
  
  × python setup.py egg_info did not run successfully.
  │ exit code: 1
  ╰─> [39 lines of output]
      Traceback (most recent call last):
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/requirements.py", line 36, in __init__
          parsed = _parse_requirement(requirement_string)
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_parser.py", line 62, in parse_requirement
          return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_parser.py", line 80, in _parse_requirement
          url, specifier, marker = _parse_requirement_details(tokenizer)
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_parser.py", line 124, in _parse_requirement_details
          marker = _parse_requirement_marker(
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_parser.py", line 145, in _parse_requirement_marker
          tokenizer.raise_syntax_error(
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/_tokenizer.py", line 167, in raise_syntax_error
          raise ParserSyntaxError(
      packaging._tokenizer.ParserSyntaxError: Expected end or semicolon (after name and no valid version specifier)
          python_version>"3.7"
                        ^
      
      The above exception was the direct cause of the following exception:
      
      Traceback (most recent call last):
        File "<string>", line 2, in <module>
        File "<pip-setuptools-caller>", line 34, in <module>
        File "/tmp/pip-install-qihvbstl/tensorflow-gpu_b8dd19a571e24d91b3952fa47b1e1527/setup.py", line 40, in <module>
          setuptools.setup()
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/__init__.py", line 116, in setup
          _install_setup_requires(attrs)
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/__init__.py", line 87, in _install_setup_requires
          dist.parse_config_files(ignore_option_errors=True)
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/dist.py", line 610, in parse_config_files
          self._finalize_requires()
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/dist.py", line 344, in _finalize_requires
          self._normalize_requires()
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/setuptools/dist.py", line 359, in _normalize_requires
          self.install_requires = list(map(str, _reqs.parse(install_requires)))
        File "/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/packaging/requirements.py", line 38, in __init__
          raise InvalidRequirement(str(e)) from e
      packaging.requirements.InvalidRequirement: Expected end or semicolon (after name and no valid version specifier)
          python_version>"3.7"
                        ^
      [end of output]
  
  note: This error originates from a subprocess, and is likely not a problem with pip.
error: metadata-generation-failed

× Encountered error while generating package metadata.
╰─> See above for output.

note: This is an issue with the package mentioned above, not pip.
hint: See above for details.
Requirement already satisfied: matplotlib in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (3.10.0)
Requirement already satisfied: contourpy>=1.0.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib) (1.3.1)
Requirement already satisfied: cycler>=0.10 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib) (0.12.1)
Requirement already satisfied: fonttools>=4.22.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib) (4.55.3)
Requirement already satisfied: kiwisolver>=1.3.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib) (1.4.8)
Requirement already satisfied: numpy>=1.23 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib) (2.0.2)
Requirement already satisfied: packaging>=20.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib) (24.2)
Requirement already satisfied: pillow>=8 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib) (11.1.0)
Requirement already satisfied: pyparsing>=2.3.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib) (3.2.1)
Requirement already satisfied: python-dateutil>=2.7 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib) (2.9.0.post0)
Requirement already satisfied: six>=1.5 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from python-dateutil>=2.7->matplotlib) (1.17.0)
Requirement already satisfied: numpy in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (2.0.2)
Requirement already satisfied: pandas in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (2.2.3)
Requirement already satisfied: numpy>=1.22.4 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from pandas) (2.0.2)
Requirement already satisfied: python-dateutil>=2.8.2 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from pandas) (2.9.0.post0)
Requirement already satisfied: pytz>=2020.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from pandas) (2024.2)
Requirement already satisfied: tzdata>=2022.7 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from pandas) (2024.2)
Requirement already satisfied: six>=1.5 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from python-dateutil>=2.8.2->pandas) (1.17.0)
Requirement already satisfied: scikit-learn in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (1.6.1)
Requirement already satisfied: numpy>=1.19.5 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from scikit-learn) (2.0.2)
Requirement already satisfied: scipy>=1.6.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from scikit-learn) (1.15.1)
Requirement already satisfied: joblib>=1.2.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from scikit-learn) (1.4.2)
Requirement already satisfied: threadpoolctl>=3.1.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from scikit-learn) (3.5.0)
Requirement already satisfied: seaborn in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (0.13.2)
Requirement already satisfied: numpy!=1.24.0,>=1.20 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from seaborn) (2.0.2)
Requirement already satisfied: pandas>=1.2 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from seaborn) (2.2.3)
Requirement already satisfied: matplotlib!=3.6.1,>=3.4 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from seaborn) (3.10.0)
Requirement already satisfied: contourpy>=1.0.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.3.1)
Requirement already satisfied: cycler>=0.10 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (0.12.1)
Requirement already satisfied: fonttools>=4.22.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (4.55.3)
Requirement already satisfied: kiwisolver>=1.3.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.4.8)
Requirement already satisfied: packaging>=20.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (24.2)
Requirement already satisfied: pillow>=8 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (11.1.0)
Requirement already satisfied: pyparsing>=2.3.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (3.2.1)
Requirement already satisfied: python-dateutil>=2.7 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (2.9.0.post0)
Requirement already satisfied: pytz>=2020.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from pandas>=1.2->seaborn) (2024.2)
Requirement already satisfied: tzdata>=2022.7 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from pandas>=1.2->seaborn) (2024.2)
Requirement already satisfied: six>=1.5 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from python-dateutil>=2.7->matplotlib!=3.6.1,>=3.4->seaborn) (1.17.0)
Requirement already satisfied: tensorflow[and-cuda] in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (2.18.0)
Requirement already satisfied: absl-py>=1.0.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (2.1.0)
Requirement already satisfied: astunparse>=1.6.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (1.6.3)
Requirement already satisfied: flatbuffers>=24.3.25 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (24.12.23)
Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (0.6.0)
Requirement already satisfied: google-pasta>=0.1.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (0.2.0)
Requirement already satisfied: libclang>=13.0.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (18.1.1)
Requirement already satisfied: opt-einsum>=2.3.2 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (3.4.0)
Requirement already satisfied: packaging in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (24.2)
Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev,>=3.20.3 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (5.29.3)
Requirement already satisfied: requests<3,>=2.21.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (2.32.3)
Requirement already satisfied: setuptools in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (75.1.0)
Requirement already satisfied: six>=1.12.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (1.17.0)
Requirement already satisfied: termcolor>=1.1.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (2.5.0)
Requirement already satisfied: typing-extensions>=3.6.6 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (4.12.2)
Requirement already satisfied: wrapt>=1.11.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (1.17.1)
Requirement already satisfied: grpcio<2.0,>=1.24.3 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (1.69.0)
Requirement already satisfied: tensorboard<2.19,>=2.18 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (2.18.0)
Requirement already satisfied: keras>=3.5.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (3.8.0)
Requirement already satisfied: numpy<2.1.0,>=1.26.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (2.0.2)
Requirement already satisfied: h5py>=3.11.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (3.12.1)
Requirement already satisfied: ml-dtypes<0.5.0,>=0.4.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (0.4.1)
Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (0.37.1)
Requirement already satisfied: nvidia-cublas-cu12==12.5.3.2 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (12.5.3.2)
Requirement already satisfied: nvidia-cuda-cupti-cu12==12.5.82 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (12.5.82)
Requirement already satisfied: nvidia-cuda-nvcc-cu12==12.5.82 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (12.5.82)
Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.5.82 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (12.5.82)
Requirement already satisfied: nvidia-cuda-runtime-cu12==12.5.82 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (12.5.82)
Requirement already satisfied: nvidia-cudnn-cu12==9.3.0.75 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (9.3.0.75)
Requirement already satisfied: nvidia-cufft-cu12==11.2.3.61 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (11.2.3.61)
Requirement already satisfied: nvidia-curand-cu12==10.3.6.82 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (10.3.6.82)
Requirement already satisfied: nvidia-cusolver-cu12==11.6.3.83 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (11.6.3.83)
Requirement already satisfied: nvidia-cusparse-cu12==12.5.1.3 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (12.5.1.3)
Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (2.21.5)
Requirement already satisfied: nvidia-nvjitlink-cu12==12.5.82 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorflow[and-cuda]) (12.5.82)
Requirement already satisfied: wheel<1.0,>=0.23.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from astunparse>=1.6.0->tensorflow[and-cuda]) (0.44.0)
Requirement already satisfied: rich in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from keras>=3.5.0->tensorflow[and-cuda]) (13.9.4)
Requirement already satisfied: namex in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from keras>=3.5.0->tensorflow[and-cuda]) (0.0.8)
Requirement already satisfied: optree in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from keras>=3.5.0->tensorflow[and-cuda]) (0.13.1)
Requirement already satisfied: charset_normalizer<4,>=2 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorflow[and-cuda]) (3.4.1)
Requirement already satisfied: idna<4,>=2.5 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorflow[and-cuda]) (3.10)
Requirement already satisfied: urllib3<3,>=1.21.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorflow[and-cuda]) (2.3.0)
Requirement already satisfied: certifi>=2017.4.17 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from requests<3,>=2.21.0->tensorflow[and-cuda]) (2024.12.14)
Requirement already satisfied: markdown>=2.6.8 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorboard<2.19,>=2.18->tensorflow[and-cuda]) (3.7)
Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorboard<2.19,>=2.18->tensorflow[and-cuda]) (0.7.2)
Requirement already satisfied: werkzeug>=1.0.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from tensorboard<2.19,>=2.18->tensorflow[and-cuda]) (3.1.3)
Requirement already satisfied: MarkupSafe>=2.1.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from werkzeug>=1.0.1->tensorboard<2.19,>=2.18->tensorflow[and-cuda]) (3.0.2)
Requirement already satisfied: markdown-it-py>=2.2.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from rich->keras>=3.5.0->tensorflow[and-cuda]) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from rich->keras>=3.5.0->tensorflow[and-cuda]) (2.19.1)
Requirement already satisfied: mdurl~=0.1 in /home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages (from markdown-it-py>=2.2.0->rich->keras>=3.5.0->tensorflow[and-cuda]) (0.1.2)

Imports

In [2]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
import torch
import os
from sklearn.metrics import confusion_matrix, classification_report

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import VGG16, ResNet50, DenseNet121, Xception
2025-01-12 21:30:07.584001: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
E0000 00:00:1736710207.618228   26656 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
E0000 00:00:1736710207.630076   26656 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2025-01-12 21:30:07.696427: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.

Init GPU

In [3]:
def setup_gpu():

    gpus = tf.config.list_physical_devices('GPU')
    if gpus:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        print(f"Number of TensorFlow GPUs: {len(gpus)}")
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    return device
device = setup_gpu()
Number of TensorFlow GPUs: 1

Data preprocessing

In [4]:
def create_data_generators(data_dir='chest_xray', img_size=(224, 224), batch_size=32):

    train_dataset = tf.keras.preprocessing.image_dataset_from_directory(
        f'{data_dir}/train',
        seed=123,
        image_size=img_size,
        batch_size=batch_size,
        label_mode='binary'
    )
    
    val_dataset = tf.keras.preprocessing.image_dataset_from_directory(
        f'{data_dir}/val',
        seed=123,
        image_size=img_size,
        batch_size=batch_size,
        label_mode='binary'
    )
    
    test_dataset = tf.keras.preprocessing.image_dataset_from_directory(
        f'{data_dir}/test',
        seed=123,
        image_size=img_size,
        batch_size=batch_size,
        label_mode='binary'
    )
    
    data_augmentation = tf.keras.Sequential([
        tf.keras.layers.RandomRotation(0.2),
        tf.keras.layers.RandomZoom(0.2),
        tf.keras.layers.RandomFlip("horizontal"),
        tf.keras.layers.RandomContrast(0.2)
    ])
    
    normalization_layer = tf.keras.layers.Rescaling(1./255)
    
    # Apply augmentation and normalization to training data
    train_dataset = train_dataset.map(
        lambda x, y: (data_augmentation(x), y)
    ).map(
        lambda x, y: (normalization_layer(x), y)
    )
    
    # Apply only normalization to validation and test data
    val_dataset = val_dataset.map(
        lambda x, y: (normalization_layer(x), y)
    )
    test_dataset = test_dataset.map(
        lambda x, y: (normalization_layer(x), y)
    )
    
    # Configure datasets for performance
    AUTOTUNE = tf.data.AUTOTUNE
    train_dataset = train_dataset.cache().prefetch(buffer_size=AUTOTUNE)
    val_dataset = val_dataset.cache().prefetch(buffer_size=AUTOTUNE)
    test_dataset = test_dataset.cache().prefetch(buffer_size=AUTOTUNE)
    
    return train_dataset, val_dataset, test_dataset

Data vizualization

In [5]:
directory = "chest_xray/train"

filepath =[]
label = []

folds = os.listdir(directory)

for fold in folds:
    f_path = os.path.join(directory , fold)
    
    imgs = os.listdir(f_path)
    
    for img in imgs:
        
        img_path = os.path.join(f_path , img)
        filepath.append(img_path)
        label.append(fold)
        
#Concat data paths with labels
file_path_series = pd.Series(filepath , name= 'filepath')
Label_path_series = pd.Series(label , name = 'label')
df_train = pd.concat([file_path_series ,Label_path_series ] , axis = 1)
In [6]:
count = df_train['label'].value_counts()

fig, axs = plt.subplots(1, 2, figsize=(12, 6), facecolor='white')

palette = sns.color_palette("viridis")
sns.set_palette(palette)
axs[0].pie(count, labels=count.index, autopct='%1.1f%%', startangle=140)
axs[0].set_title('Distribution of Categories')

sns.barplot(x=count.index, y=count.values, ax=axs[1], palette="viridis")
axs[1].set_title('Count of Categories')

plt.tight_layout()

plt.show()
/tmp/ipykernel_26656/3026648010.py:10: FutureWarning: 

Passing `palette` without assigning `hue` is deprecated and will be removed in v0.14.0. Assign the `x` variable to `hue` and set `legend=False` for the same effect.

  sns.barplot(x=count.index, y=count.values, ax=axs[1], palette="viridis")
No description has been provided for this image
In [7]:
directory = "chest_xray/test"

filepath =[]
label = []

folds = os.listdir(directory)

for fold in folds:
    f_path = os.path.join(directory , fold)
    
    imgs = os.listdir(f_path)
    
    for img in imgs:
        
        img_path = os.path.join(f_path , img)
        filepath.append(img_path)
        label.append(fold)
        
#Concat data paths with labels
file_path_series = pd.Series(filepath , name= 'filepath')
Label_path_series = pd.Series(label , name = 'label')
df_test = pd.concat([file_path_series ,Label_path_series ] , axis = 1)
In [8]:
count = df_test['label'].value_counts()

fig, axs = plt.subplots(1, 2, figsize=(12, 6), facecolor='white')

palette = sns.color_palette("viridis")
sns.set_palette(palette)
axs[0].pie(count, labels=count.index, autopct='%1.1f%%', startangle=140)
axs[0].set_title('Distribution of Categories')

sns.barplot(x=count.index, y=count.values, ax=axs[1], palette="viridis")
axs[1].set_title('Count of Categories')

plt.tight_layout()

plt.show()
/tmp/ipykernel_26656/2818601119.py:10: FutureWarning: 

Passing `palette` without assigning `hue` is deprecated and will be removed in v0.14.0. Assign the `x` variable to `hue` and set `legend=False` for the same effect.

  sns.barplot(x=count.index, y=count.values, ax=axs[1], palette="viridis")
No description has been provided for this image
In [9]:
def visualize_images(path, num_images=5):
    
    image_filenames = os.listdir(path)
    
    num_images = min(num_images, len(image_filenames))
    
    fig, axes = plt.subplots(1, num_images, figsize=(15, 3),facecolor='white')
    
    for i, image_filename in enumerate(image_filenames[:num_images]):
        image_path = os.path.join(path, image_filename)
        image = mpimg.imread(image_path)
        
        axes[i].imshow(image)
        axes[i].axis('off')
        axes[i].set_title(image_filename)
    
    plt.tight_layout()
    plt.show()
In [10]:
path_to_visualize = "chest_xray/train/NORMAL"
visualize_images(path_to_visualize, num_images=5)
No description has been provided for this image
In [11]:
path_to_visualize = "chest_xray/train/PNEUMONIA"
visualize_images(path_to_visualize, num_images=5)
No description has been provided for this image

Model Architectures

In [12]:
def create_xception_model():
    base_model = Xception(weights='imagenet', include_top=False, pooling='avg', input_shape=(224, 224, 3))
    base_model.trainable = False
    model = tf.keras.Sequential([
        base_model,
        tf.keras.layers.BatchNormalization(),
        tf.keras.layers.Dropout(0.45),
        tf.keras.layers.Dense(220, activation='relu'),
        tf.keras.layers.Dropout(0.25),
        tf.keras.layers.Dense(60,activation='relu'),
        tf.keras.layers.Dense(1, activation='sigmoid'),
    ])
    return model
In [13]:
def create_custom_cnn():
    model = tf.keras.Sequential([
        tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(224, 224, 3)),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Conv2D(64, 3, activation='relu'),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Conv2D(128, 3, activation='relu'),
        tf.keras.layers.MaxPooling2D(),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(128, activation='relu'),
        tf.keras.layers.Dropout(0.5),
        tf.keras.layers.Dense(1, activation='sigmoid')
    ])
    return model

def create_transfer_learning_model(base_model_name='VGG16'):

    if base_model_name == 'VGG16':
        base_model = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    elif base_model_name == 'ResNet50':
        base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    elif base_model_name == 'DenseNet121':
        base_model = DenseNet121(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
        for layer in base_model.layers[-20:]:
            layer.trainable = True
    elif base_model_name == 'CNN':
        base_model = create_custom_cnn()
        return base_model
    elif base_model_name == 'Xception':
        base_model = create_xception_model()
        return base_model
    if base_model_name == 'VGG16' or base_model_name == 'ResNet50' or base_model_name == 'CNN':
        model = tf.keras.Sequential([
            base_model,
            tf.keras.layers.GlobalAveragePooling2D(),
            tf.keras.layers.Dense(256, activation='relu'),
            tf.keras.layers.Dropout(0.5),
            tf.keras.layers.Dense(1, activation='sigmoid')
        ])
    elif base_model_name == 'DenseNet121':
        model = tf.keras.Sequential([
            base_model,
            tf.keras.layers.GlobalAveragePooling2D(),
            tf.keras.layers.Dense(512, activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Dense(256, activation='relu'),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Dropout(0.5),
            tf.keras.layers.Dense(1, activation='sigmoid')
        ])
    return model

Training

In [14]:
def load_saved_model(model_name):
    """Load a saved model"""
    try:
        return tf.keras.models.load_model(f'best_{model_name}.keras')
    except (OSError, IOError) as e:
        print("Model file not found, continuining with training...")
    return None

def evaluate_model(model, test_dataset):
    """Evaluate a model on test data"""
    test_loss, test_accuracy = model.evaluate(test_dataset)
    print(f"\nTest accuracy: {test_accuracy:.4f}")
    print(f"Test loss: {test_loss:.4f}")
    
    return test_loss, test_accuracy
In [15]:
def predict_image(image_path):
    # Load and preprocess the image
    img = image.load_img(image_path, target_size=(224, 224))
    img_array = image.img_to_array(img)
    img_array = np.expand_dims(img_array, axis=0)
    img_array = img_array / 255.0  # Normalize pixel values
    
    # Make prediction
    prediction = model.predict(img_array)
    
    # For binary classification
    result = "Pneumonia" if prediction[0] > 0.5 else "Normal"
    confidence = prediction[0] if prediction[0] > 0.5 else 1 - prediction[0]
    
    return result, confidence
In [16]:
def train_and_evaluate(model, model_name, continue_training=False):

    model_path = f'best_{model_name}.h5'

    if not continue_training and os.path.exists(model_path):
        print(f"Loading existing model: {model_path}")
        model = tf.keras.models.load_model(model_path)
    else:
        print("Creating new model...")
        model = create_transfer_learning_model(model_name)

    model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
        loss='binary_crossentropy',
        metrics=['accuracy', tf.keras.metrics.Recall(), tf.keras.metrics.F1Score()]
    )


    callbacks = [
        tf.keras.callbacks.EarlyStopping(
        patience=5,  # Increased patience
        restore_best_weights=True
        ),
        tf.keras.callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.2,
            patience=3,
            min_lr=0.00001
        ),
        tf.keras.callbacks.ModelCheckpoint(
            f'best_{model_name}.keras',
            save_best_only=True,
            monitor='val_f1_score'
        )
    ]

    history = model.fit(
        train_ds,
        validation_data=val_ds,
        epochs=20,
        callbacks=callbacks
    )
    print(history)
    return history

Model Comparison

In [17]:
def plot_results(results):
    best_epoch = results['val_accuracy'].index(max(results['val_accuracy'])) + 1

    plt.style.use('seaborn-v0_8-darkgrid')

    fig, axs = plt.subplots(1, 2, figsize=(16, 5))

    # Plot training and validation accuracy
    axs[0].plot(results['accuracy'], label='Training Accuracy', color='blue')
    axs[0].plot(results['val_accuracy'], label='Validation Accuracy', color='red')
    axs[0].scatter(best_epoch - 1, results['val_accuracy'][best_epoch - 1], color='green', label=f'Best Epoch: {best_epoch}')
    axs[0].set_xlabel('Epoch')
    axs[0].set_ylabel('Accuracy')
    axs[0].set_title('Training and Validation Accuracy')
    axs[0].legend()

    # Plot training and validation loss
    axs[1].plot(results['loss'], label='Training Loss', color='blue')
    axs[1].plot(results['val_loss'], label='Validation Loss', color='red')
    axs[1].scatter(best_epoch - 1, results['val_loss'][best_epoch - 1], color='green',label=f'Best Epoch: {best_epoch}')
    axs[1].set_xlabel('Epoch')
    axs[1].set_ylabel('Loss')
    axs[1].set_title('Training and Validation Loss')
    axs[1].legend()

    plt.tight_layout()
    plt.show()
In [18]:
import shutil
import os

def create_balanced_test_set(source_dir, dest_dir, n_samples=20):
    os.makedirs(os.path.join(dest_dir, 'normal'), exist_ok=True)
    os.makedirs(os.path.join(dest_dir, 'pneumonia'), exist_ok=True)
    
    for class_name in ['NORMAL', 'PNEUMONIA']:
        source_files = os.listdir(os.path.join(source_dir, class_name))[:n_samples]
        for file_name in source_files:
            shutil.copy(
                os.path.join(source_dir, class_name, file_name),
                os.path.join(dest_dir, class_name, file_name)
            )
create_balanced_test_set('chest_xray/test/','chest_xray/balanced-test/')

test_dir = 'chest_xray/balanced-test/'
test_ds = tf.keras.utils.image_dataset_from_directory(
    test_dir, 
    seed=123,
    image_size=(224, 224),
    batch_size=40,
    shuffle=True,
    validation_split=None,
    class_names=['normal', 'pneumonia'])
class_labels = ['normal', 'pneumonia']
def plot_images_with_predictions(model, dataset, class_labels, num_images=40):
    images = []
    labels = []
    
    for img, lbl in dataset.take(1):
        images = img.numpy()
        labels = lbl.numpy()
    
    predictions = model.predict(dataset)
    predictions = (predictions > 0.5).astype(int)
    
    plt.figure(figsize=(15, 10))
    for i in range(min(num_images, len(images))):
        plt.subplot(8, 5, i + 1)
        plt.imshow(images[i].astype("uint8"))
        true_class = class_labels[int(labels[i])]
        pred_class = class_labels[int(predictions[i])]
        plt.title(f'True: {true_class}\nPred: {pred_class}')
        plt.axis('off')
    
    plt.tight_layout()
    plt.show()


cnn_best = load_saved_model('VGG16')
plot_images_with_predictions(cnn_best, test_ds, class_labels)
cnn_best = load_saved_model('CNN')
plot_images_with_predictions(cnn_best, test_ds, class_labels)
cnn_best = load_saved_model('ResNet50')
plot_images_with_predictions(cnn_best, test_ds, class_labels)
cnn_best = load_saved_model('DenseNet121')
plot_images_with_predictions(cnn_best, test_ds, class_labels)
cnn_best = load_saved_model('Xception')
plot_images_with_predictions(cnn_best, test_ds, class_labels)
Found 40 files belonging to 2 classes.
I0000 00:00:1736710213.709621   26656 gpu_device.cc:2022] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 5558 MB memory:  -> device: 0, name: NVIDIA GeForce RTX 3070 Ti, pci bus id: 0000:0a:00.0, compute capability: 8.6
2025-01-12 21:30:16.114241: I tensorflow/core/framework/local_rendezvous.cc:405] Local rendezvous is aborting with status: OUT_OF_RANGE: End of sequence
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1736710216.372285   26867 service.cc:148] XLA service 0x7f02ac005640 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
I0000 00:00:1736710216.372396   26867 service.cc:156]   StreamExecutor device (0): NVIDIA GeForce RTX 3070 Ti, Compute Capability 8.6
2025-01-12 21:30:16.388443: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:268] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.
I0000 00:00:1736710216.438307   26867 cuda_dnn.cc:529] Loaded cuDNN version 90300
2025-01-12 21:30:17.219380: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_184', 8 bytes spill stores, 8 bytes spill loads

2025-01-12 21:30:19.160820: W external/local_xla/xla/tsl/framework/bfc_allocator.cc:378] Garbage collection: deallocate free memory regions (i.e., allocations) so that we can re-allocate a larger region to avoid OOM due to memory fragmentation. If you see this message frequently, you are running near the threshold of the available device memory and re-allocation may incur great performance overhead. You may try smaller batch sizes to observe the performance impact. Set TF_ENABLE_GPU_GARBAGE_COLLECTION=false if you'd like to disable this feature.
1/1 ━━━━━━━━━━━━━━━━━━━━ 11s 11s/step
I0000 00:00:1736710227.037441   26867 device_compiler.h:188] Compiled cluster using XLA!  This line is logged at most once for the lifetime of the process.
/tmp/ipykernel_26656/2425999544.py:43: DeprecationWarning: Conversion of an array with ndim > 0 to a scalar is deprecated, and will error in future. Ensure you extract a single element from your array before performing this operation. (Deprecated NumPy 1.25.)
  pred_class = class_labels[int(predictions[i])]
No description has been provided for this image
2025-01-12 21:30:29.017880: I tensorflow/core/framework/local_rendezvous.cc:405] Local rendezvous is aborting with status: OUT_OF_RANGE: End of sequence
1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 1s/step
No description has been provided for this image
2025-01-12 21:30:35.611837: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_1363_0', 8 bytes spill stores, 8 bytes spill loads

2025-01-12 21:30:35.915599: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_1363_0', 8 bytes spill stores, 8 bytes spill loads

2025-01-12 21:30:35.916461: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_1363', 8 bytes spill stores, 8 bytes spill loads

2025-01-12 21:30:35.939345: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_1363', 224 bytes spill stores, 224 bytes spill loads

1/1 ━━━━━━━━━━━━━━━━━━━━ 5s 5s/step
No description has been provided for this image
2025-01-12 21:30:43.457685: I tensorflow/core/framework/local_rendezvous.cc:405] Local rendezvous is aborting with status: OUT_OF_RANGE: End of sequence
2025-01-12 21:30:46.815744: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_3275_0', 8 bytes spill stores, 8 bytes spill loads

2025-01-12 21:30:46.881381: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_3275', 8 bytes spill stores, 8 bytes spill loads

2025-01-12 21:30:46.932952: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_3275', 220 bytes spill stores, 220 bytes spill loads

2025-01-12 21:30:47.057932: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_3275_0', 1128 bytes spill stores, 1376 bytes spill loads

1/1 ━━━━━━━━━━━━━━━━━━━━ 18s 18s/step
No description has been provided for this image
2025-01-12 21:31:05.031646: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_1039', 232 bytes spill stores, 232 bytes spill loads

WARNING:tensorflow:5 out of the last 5 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x7f030417ecb0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
1/1 ━━━━━━━━━━━━━━━━━━━━ 10s 10s/step
No description has been provided for this image
In [19]:
from tensorflow.keras import mixed_precision

def compare_models():
    models = {
        'CNN': create_custom_cnn(),
        'VGG16': create_transfer_learning_model('VGG16'),
        'ResNet50': create_transfer_learning_model('ResNet50'),
        'DenseNet121': create_transfer_learning_model('DenseNet121'),
        'Xception': create_xception_model()
    }
    
    results = {}
    for name, model in models.items():
        print(f"Training {name}...")
        
        if name == "DenseNet121":
            policy = mixed_precision.Policy('mixed_float16')
            mixed_precision.set_global_policy(policy)

        history = train_and_evaluate(model, name)

        if history.history != None:
            results = history.history
            plot_results(results)
            results = None
        
        tf.keras.backend.clear_session()
        with torch.no_grad():
            torch.cuda.empty_cache()
    

Comparison

In [20]:
def plot_performance_comparison(metrics):
    # Bar plot for Recall and F1 Score
    plt.figure(figsize=(12, 6))
    
    models = list(metrics.keys())
    recalls = [m['recall'] for m in metrics.values()]
    f1_scores = [m['f1'] for m in metrics.values()]
    
    x = np.arange(len(models))
    width = 0.35
    
    plt.bar(x - width/2, recalls, width, label='Recall')
    plt.bar(x + width/2, f1_scores, width, label='F1 Score')
    
    plt.xlabel('Models')
    plt.ylabel('Score')
    plt.title('Model Performance Comparison')
    plt.xticks(x, models, rotation=45)
    plt.legend()
    plt.tight_layout()
    plt.show()
In [21]:
def plot_confusion_matrices(metrics):
    fig, axes = plt.subplots(2, 2, figsize=(15, 15))
    fig.suptitle('Confusion Matrices for Different Models')
    
    for (name, metric), ax in zip(metrics.items(), axes.flat):
        sns.heatmap(metric['confusion_matrix'], 
                   annot=True, 
                   fmt='d',
                   cmap='Blues',
                   ax=ax)
        ax.set_title(name)
        ax.set_xlabel('Predicted')
        ax.set_ylabel('True')
    
    plt.tight_layout()
    plt.show()

def create_performance_table(metrics):
    data = {
        'Model': [],
        'Recall': [],
        'F1 Score': []
    }
    
    for name, metric in metrics.items():
        data['Model'].append(name)
        data['Recall'].append(f"{metric['recall']:.4f}")
        data['F1 Score'].append(f"{metric['f1']:.4f}")
    
    return pd.DataFrame(data)
In [22]:
from sklearn.metrics import confusion_matrix, recall_score, f1_score

def load_and_evaluate_models(test_ds):
    metrics = {}
    
    model_names = ['ResNet50', 'VGG16', 'DenseNet121', 'CNN', 'Xception']
    
    for name in model_names:
        model = load_saved_model(name)
        
        y_pred = model.predict(test_ds)
        y_pred = (y_pred > 0.5).astype(int)
        
        y_true = np.concatenate([y for x, y in test_ds], axis=0)
        
        metrics[name] = {
            'recall': recall_score(y_true, y_pred),
            'f1': f1_score(y_true, y_pred),
            'confusion_matrix': confusion_matrix(y_true, y_pred)
        }

    return metrics

Main Program

In [23]:
gpus = tf.config.list_physical_devices('GPU')
if gpus:
    print("Running code on GPU:", gpus)
else:
    print("Running code on CPU:", tf.config.list_physical_devices('CPU'))

train_ds, val_ds, test_ds = create_data_generators()

compare_models()
metrics = load_and_evaluate_models(test_ds)

plot_performance_comparison(metrics)
plot_confusion_matrices(metrics)

performance_table = create_performance_table(metrics)
print("\nModel Performance Metrics:")
print(performance_table)
Running code on GPU: [PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
Found 5216 files belonging to 2 classes.
Found 16 files belonging to 2 classes.
Found 624 files belonging to 2 classes.
/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Training CNN...
Creating new model...
Epoch 1/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 32s 158ms/step - accuracy: 0.7375 - f1_score: 0.8573 - loss: 0.5698 - recall: 0.9756 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 0.7030 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 2/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 5s 28ms/step - accuracy: 0.8164 - f1_score: 0.8573 - loss: 0.3745 - recall: 0.9436 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 0.6498 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 3/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 5s 29ms/step - accuracy: 0.8627 - f1_score: 0.8573 - loss: 0.3111 - recall: 0.9316 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 0.8646 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 4/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 5s 29ms/step - accuracy: 0.8808 - f1_score: 0.8573 - loss: 0.2772 - recall: 0.9400 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 0.8230 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 5/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 5s 28ms/step - accuracy: 0.8838 - f1_score: 0.8573 - loss: 0.2646 - recall: 0.9399 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 0.6859 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 6/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 5s 28ms/step - accuracy: 0.9039 - f1_score: 0.8573 - loss: 0.2316 - recall: 0.9434 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 0.8506 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 7/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 5s 28ms/step - accuracy: 0.9127 - f1_score: 0.8573 - loss: 0.2246 - recall: 0.9479 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 0.8522 - val_recall: 1.0000 - learning_rate: 2.0000e-05
<keras.src.callbacks.history.History object at 0x7f02bd7c9090>
No description has been provided for this image
Training VGG16...
Creating new model...
Epoch 1/20
2025-01-12 21:32:37.402679: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_1904', 24 bytes spill stores, 24 bytes spill loads

2025-01-12 21:32:37.660685: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_1904', 60 bytes spill stores, 64 bytes spill loads

2025-01-12 21:32:37.673275: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_1904', 68 bytes spill stores, 72 bytes spill loads

163/163 ━━━━━━━━━━━━━━━━━━━━ 74s 249ms/step - accuracy: 0.7618 - f1_score: 0.8573 - loss: 0.4656 - recall: 0.9202 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 0.9677 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 2/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 33s 203ms/step - accuracy: 0.8558 - f1_score: 0.8573 - loss: 0.3184 - recall: 0.9004 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 1.1764 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 3/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 33s 201ms/step - accuracy: 0.8967 - f1_score: 0.8573 - loss: 0.2426 - recall: 0.9217 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 0.6561 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 4/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 34s 207ms/step - accuracy: 0.9227 - f1_score: 0.8573 - loss: 0.1958 - recall: 0.9370 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 0.7213 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 5/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 35s 213ms/step - accuracy: 0.9520 - f1_score: 0.8573 - loss: 0.1321 - recall: 0.9653 - val_accuracy: 0.9375 - val_f1_score: 0.6667 - val_loss: 0.1469 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 6/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 36s 221ms/step - accuracy: 0.9592 - f1_score: 0.8573 - loss: 0.1057 - recall: 0.9685 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 0.9897 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 7/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 33s 204ms/step - accuracy: 0.9656 - f1_score: 0.8573 - loss: 0.0914 - recall: 0.9743 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 1.0060 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 8/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 34s 209ms/step - accuracy: 0.9684 - f1_score: 0.8573 - loss: 0.0855 - recall: 0.9751 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 2.3329 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 9/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 33s 205ms/step - accuracy: 0.9808 - f1_score: 0.8573 - loss: 0.0487 - recall: 0.9853 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 1.6298 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 10/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 32s 196ms/step - accuracy: 0.9937 - f1_score: 0.8573 - loss: 0.0223 - recall: 0.9945 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 1.8443 - val_recall: 1.0000 - learning_rate: 2.0000e-05
<keras.src.callbacks.history.History object at 0x7f02f41c8d00>
No description has been provided for this image
Training ResNet50...
Creating new model...
Epoch 1/20
2025-01-12 21:39:13.388993: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_16371_0', 112 bytes spill stores, 224 bytes spill loads

2025-01-12 21:39:13.682197: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_20857', 60 bytes spill stores, 64 bytes spill loads

2025-01-12 21:39:13.689638: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_16371', 220 bytes spill stores, 576 bytes spill loads

2025-01-12 21:39:13.849425: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_20857', 68 bytes spill stores, 72 bytes spill loads

2025-01-12 21:39:13.954775: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_20857', 24 bytes spill stores, 24 bytes spill loads

163/163 ━━━━━━━━━━━━━━━━━━━━ 66s 171ms/step - accuracy: 0.8877 - f1_score: 0.8573 - loss: 0.2518 - recall: 0.9256 - val_accuracy: 0.5000 - val_f1_score: 0.6667 - val_loss: 10.5123 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 2/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 129ms/step - accuracy: 0.9837 - f1_score: 0.8575 - loss: 0.0500 - recall: 0.9886 - val_accuracy: 0.5000 - val_f1_score: 0.6667 - val_loss: 17.7056 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 3/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - accuracy: 0.9904 - f1_score: 0.8604 - loss: 0.0232 - recall: 0.9940 - val_accuracy: 0.5000 - val_f1_score: 0.6667 - val_loss: 9.3674 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 4/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - accuracy: 0.9865 - f1_score: 0.8640 - loss: 0.0478 - recall: 0.9933 - val_accuracy: 0.5000 - val_f1_score: 0.6667 - val_loss: 6.7796 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 5/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - accuracy: 0.9947 - f1_score: 0.8703 - loss: 0.0133 - recall: 0.9963 - val_accuracy: 0.5000 - val_f1_score: 0.6667 - val_loss: 6.7759 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 6/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 129ms/step - accuracy: 0.9945 - f1_score: 0.8677 - loss: 0.0188 - recall: 0.9962 - val_accuracy: 0.5000 - val_f1_score: 0.6667 - val_loss: 6.9019 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 7/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - accuracy: 0.9987 - f1_score: 0.8790 - loss: 0.0040 - recall: 0.9999 - val_accuracy: 0.5625 - val_f1_score: 0.6667 - val_loss: 5.6062 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 8/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - accuracy: 0.9970 - f1_score: 0.8815 - loss: 0.0124 - recall: 0.9977 - val_accuracy: 0.5000 - val_f1_score: 0.6667 - val_loss: 5.4165 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 9/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - accuracy: 0.9935 - f1_score: 0.8724 - loss: 0.0162 - recall: 0.9952 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 2.0194 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 10/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 131ms/step - accuracy: 0.9969 - f1_score: 0.8766 - loss: 0.0106 - recall: 0.9979 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 4.4262 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 11/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 41s 133ms/step - accuracy: 0.9977 - f1_score: 0.8750 - loss: 0.0091 - recall: 0.9984 - val_accuracy: 0.5625 - val_f1_score: 0.6667 - val_loss: 8.3043 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 12/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - accuracy: 0.9980 - f1_score: 0.8720 - loss: 0.0050 - recall: 0.9988 - val_accuracy: 0.5000 - val_f1_score: 0.6667 - val_loss: 9.1842 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 13/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 131ms/step - accuracy: 0.9991 - f1_score: 0.8772 - loss: 0.0023 - recall: 0.9996 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 2.4378 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 14/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - accuracy: 1.0000 - f1_score: 0.8820 - loss: 2.3260e-04 - recall: 1.0000 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 1.9426 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 15/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 129ms/step - accuracy: 1.0000 - f1_score: 0.8836 - loss: 1.9429e-04 - recall: 1.0000 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 1.8731 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 16/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 129ms/step - accuracy: 1.0000 - f1_score: 0.8848 - loss: 1.6620e-04 - recall: 1.0000 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 1.7878 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 17/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 130ms/step - accuracy: 1.0000 - f1_score: 0.8857 - loss: 1.1319e-04 - recall: 1.0000 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 1.7652 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 18/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 129ms/step - accuracy: 1.0000 - f1_score: 0.8868 - loss: 1.1475e-04 - recall: 1.0000 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 1.7948 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 19/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 128ms/step - accuracy: 1.0000 - f1_score: 0.8845 - loss: 9.3633e-05 - recall: 1.0000 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 1.7746 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 20/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 21s 129ms/step - accuracy: 1.0000 - f1_score: 0.8896 - loss: 7.0306e-05 - recall: 1.0000 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 1.7637 - val_recall: 1.0000 - learning_rate: 2.0000e-05
<keras.src.callbacks.history.History object at 0x7f0304110940>
No description has been provided for this image
Training DenseNet121...
Creating new model...
Epoch 1/20
2025-01-12 21:49:06.988904: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'input_reduce_select_fusion_10', 44 bytes spill stores, 40 bytes spill loads
ptxas warning : Registers are spilled to local memory in function 'input_reduce_select_fusion_12', 68 bytes spill stores, 64 bytes spill loads
ptxas warning : Registers are spilled to local memory in function 'input_reduce_select_fusion_13', 68 bytes spill stores, 64 bytes spill loads
ptxas warning : Registers are spilled to local memory in function 'input_reduce_select_fusion_14', 52 bytes spill stores, 44 bytes spill loads
ptxas warning : Registers are spilled to local memory in function 'input_reduce_select_fusion_15', 16 bytes spill stores, 16 bytes spill loads
ptxas warning : Registers are spilled to local memory in function 'input_reduce_select_fusion_16', 60 bytes spill stores, 52 bytes spill loads
ptxas warning : Registers are spilled to local memory in function 'input_reduce_select_fusion_17', 64 bytes spill stores, 60 bytes spill loads
ptxas warning : Registers are spilled to local memory in function 'input_reduce_select_fusion_18', 68 bytes spill stores, 68 bytes spill loads
ptxas warning : Registers are spilled to local memory in function 'input_reduce_select_fusion_19', 32 bytes spill stores, 40 bytes spill loads

163/163 ━━━━━━━━━━━━━━━━━━━━ 0s 138ms/step - accuracy: 0.7267 - f1_score: 0.8619 - loss: 0.6306 - recall: 0.6903
2025-01-12 21:49:33.726889: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_3707', 12 bytes spill stores, 12 bytes spill loads

2025-01-12 21:49:33.777198: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_3720', 12 bytes spill stores, 12 bytes spill loads

2025-01-12 21:49:33.910769: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_3707', 12 bytes spill stores, 12 bytes spill loads

163/163 ━━━━━━━━━━━━━━━━━━━━ 163s 232ms/step - accuracy: 0.7274 - f1_score: 0.8619 - loss: 0.6293 - recall: 0.6910 - val_accuracy: 0.5625 - val_f1_score: 0.6667 - val_loss: 0.8931 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 2/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 137ms/step - accuracy: 0.9408 - f1_score: 0.9069 - loss: 0.1871 - recall: 0.9463 - val_accuracy: 0.5000 - val_f1_score: 0.6667 - val_loss: 1.4384 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 3/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 137ms/step - accuracy: 0.9751 - f1_score: 0.9250 - loss: 0.0878 - recall: 0.9809 - val_accuracy: 0.6250 - val_f1_score: 0.6667 - val_loss: 0.7839 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 4/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 136ms/step - accuracy: 0.9870 - f1_score: 0.9358 - loss: 0.0479 - recall: 0.9916 - val_accuracy: 0.6875 - val_f1_score: 0.6957 - val_loss: 0.8979 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 5/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 138ms/step - accuracy: 0.9964 - f1_score: 0.9457 - loss: 0.0293 - recall: 0.9975 - val_accuracy: 0.7500 - val_f1_score: 0.6957 - val_loss: 0.9685 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 6/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 138ms/step - accuracy: 0.9984 - f1_score: 0.9548 - loss: 0.0201 - recall: 0.9993 - val_accuracy: 0.7500 - val_f1_score: 0.6957 - val_loss: 1.0079 - val_recall: 1.0000 - learning_rate: 1.0000e-04
Epoch 7/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 23s 138ms/step - accuracy: 0.9985 - f1_score: 0.9585 - loss: 0.0163 - recall: 0.9992 - val_accuracy: 0.8125 - val_f1_score: 0.6957 - val_loss: 0.2693 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 8/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 23s 139ms/step - accuracy: 0.9996 - f1_score: 0.9617 - loss: 0.0134 - recall: 0.9995 - val_accuracy: 0.8750 - val_f1_score: 0.7619 - val_loss: 0.1768 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 9/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 137ms/step - accuracy: 0.9996 - f1_score: 0.9629 - loss: 0.0117 - recall: 0.9995 - val_accuracy: 0.8750 - val_f1_score: 0.7273 - val_loss: 0.1958 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 10/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 136ms/step - accuracy: 0.9996 - f1_score: 0.9650 - loss: 0.0114 - recall: 0.9995 - val_accuracy: 0.8125 - val_f1_score: 0.7273 - val_loss: 0.2442 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 11/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 136ms/step - accuracy: 0.9996 - f1_score: 0.9640 - loss: 0.0115 - recall: 0.9995 - val_accuracy: 0.8125 - val_f1_score: 0.7273 - val_loss: 0.2555 - val_recall: 1.0000 - learning_rate: 2.0000e-05
Epoch 12/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 137ms/step - accuracy: 0.9996 - f1_score: 0.9662 - loss: 0.0114 - recall: 0.9995 - val_accuracy: 0.8125 - val_f1_score: 0.7273 - val_loss: 0.2788 - val_recall: 1.0000 - learning_rate: 1.0000e-05
Epoch 13/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 22s 136ms/step - accuracy: 0.9996 - f1_score: 0.9658 - loss: 0.0108 - recall: 0.9995 - val_accuracy: 0.8125 - val_f1_score: 0.7273 - val_loss: 0.3017 - val_recall: 1.0000 - learning_rate: 1.0000e-05
<keras.src.callbacks.history.History object at 0x7f02bc9bb0d0>
No description has been provided for this image
Training Xception...
Creating new model...
Epoch 1/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 25s 96ms/step - accuracy: 0.5932 - f1_score: 0.8573 - loss: 0.7594 - recall: 0.5776 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 0.4467 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 2/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 10s 58ms/step - accuracy: 0.8783 - f1_score: 0.8573 - loss: 0.2860 - recall: 0.9434 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 0.3710 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 3/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 10s 59ms/step - accuracy: 0.8953 - f1_score: 0.8573 - loss: 0.2448 - recall: 0.9402 - val_accuracy: 0.6875 - val_f1_score: 0.6667 - val_loss: 0.3580 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 4/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 10s 59ms/step - accuracy: 0.9105 - f1_score: 0.8573 - loss: 0.2225 - recall: 0.9528 - val_accuracy: 0.7500 - val_f1_score: 0.6667 - val_loss: 0.3420 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 5/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 10s 59ms/step - accuracy: 0.9145 - f1_score: 0.8573 - loss: 0.2067 - recall: 0.9555 - val_accuracy: 0.8125 - val_f1_score: 0.6667 - val_loss: 0.3415 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 6/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 10s 59ms/step - accuracy: 0.9276 - f1_score: 0.8573 - loss: 0.1854 - recall: 0.9586 - val_accuracy: 0.8125 - val_f1_score: 0.6667 - val_loss: 0.3249 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 7/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 10s 58ms/step - accuracy: 0.9305 - f1_score: 0.8573 - loss: 0.1722 - recall: 0.9610 - val_accuracy: 0.8125 - val_f1_score: 0.6667 - val_loss: 0.3276 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 8/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 9s 57ms/step - accuracy: 0.9348 - f1_score: 0.8573 - loss: 0.1632 - recall: 0.9647 - val_accuracy: 0.8750 - val_f1_score: 0.6667 - val_loss: 0.2806 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 9/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 10s 59ms/step - accuracy: 0.9392 - f1_score: 0.8573 - loss: 0.1497 - recall: 0.9667 - val_accuracy: 0.8125 - val_f1_score: 0.6667 - val_loss: 0.3119 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 10/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 9s 55ms/step - accuracy: 0.9470 - f1_score: 0.8573 - loss: 0.1397 - recall: 0.9699 - val_accuracy: 0.8125 - val_f1_score: 0.6667 - val_loss: 0.2963 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 11/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 9s 55ms/step - accuracy: 0.9424 - f1_score: 0.8573 - loss: 0.1358 - recall: 0.9651 - val_accuracy: 0.8125 - val_f1_score: 0.6667 - val_loss: 0.2774 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 12/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 9s 55ms/step - accuracy: 0.9489 - f1_score: 0.8573 - loss: 0.1262 - recall: 0.9720 - val_accuracy: 0.8125 - val_f1_score: 0.6667 - val_loss: 0.2983 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 13/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 9s 56ms/step - accuracy: 0.9472 - f1_score: 0.8573 - loss: 0.1303 - recall: 0.9728 - val_accuracy: 0.8750 - val_f1_score: 0.6667 - val_loss: 0.2309 - val_recall: 0.8750 - learning_rate: 1.0000e-04
Epoch 14/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 9s 55ms/step - accuracy: 0.9536 - f1_score: 0.8573 - loss: 0.1253 - recall: 0.9743 - val_accuracy: 0.8125 - val_f1_score: 0.6667 - val_loss: 0.2400 - val_recall: 0.7500 - learning_rate: 1.0000e-04
Epoch 15/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 10s 58ms/step - accuracy: 0.9584 - f1_score: 0.8573 - loss: 0.1148 - recall: 0.9771 - val_accuracy: 0.8750 - val_f1_score: 0.6667 - val_loss: 0.2381 - val_recall: 0.7500 - learning_rate: 1.0000e-04
Epoch 16/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 9s 57ms/step - accuracy: 0.9543 - f1_score: 0.8573 - loss: 0.1097 - recall: 0.9713 - val_accuracy: 0.8125 - val_f1_score: 0.6667 - val_loss: 0.2412 - val_recall: 0.7500 - learning_rate: 1.0000e-04
Epoch 17/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 9s 57ms/step - accuracy: 0.9536 - f1_score: 0.8573 - loss: 0.1060 - recall: 0.9749 - val_accuracy: 0.8750 - val_f1_score: 0.6667 - val_loss: 0.2510 - val_recall: 0.7500 - learning_rate: 2.0000e-05
Epoch 18/20
163/163 ━━━━━━━━━━━━━━━━━━━━ 9s 55ms/step - accuracy: 0.9640 - f1_score: 0.8573 - loss: 0.0978 - recall: 0.9757 - val_accuracy: 0.8750 - val_f1_score: 0.6667 - val_loss: 0.2616 - val_recall: 0.7500 - learning_rate: 2.0000e-05
<keras.src.callbacks.history.History object at 0x7f03043d0370>
No description has been provided for this image
WARNING:tensorflow:6 out of the last 6 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x7f01a05d77f0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
20/20 ━━━━━━━━━━━━━━━━━━━━ 5s 137ms/step
20/20 ━━━━━━━━━━━━━━━━━━━━ 8s 80ms/step
/home/dariu/anaconda3/envs/gpu_env/lib/python3.10/site-packages/keras/src/saving/saving_lib.py:757: UserWarning: Skipping variable loading for optimizer 'adam', because it has 746 variables whereas the saved optimizer has 750 variables. 
  saveable.load_own_variables(weights_store.get(inner_path))
2025-01-12 21:57:45.720172: I external/local_xla/xla/stream_executor/cuda/cuda_asm_compiler.cc:397] ptxas warning : Registers are spilled to local memory in function 'gemm_fusion_dot_3685', 8 bytes spill stores, 8 bytes spill loads

20/20 ━━━━━━━━━━━━━━━━━━━━ 13s 351ms/step
2025-01-12 21:57:55.377944: I tensorflow/core/framework/local_rendezvous.cc:405] Local rendezvous is aborting with status: OUT_OF_RANGE: End of sequence
20/20 ━━━━━━━━━━━━━━━━━━━━ 1s 17ms/step
20/20 ━━━━━━━━━━━━━━━━━━━━ 4s 124ms/step
No description has been provided for this image
No description has been provided for this image
Model Performance Metrics:
         Model  Recall F1 Score
0     ResNet50  1.0000   0.7692
1        VGG16  0.8692   0.8571
2  DenseNet121  1.0000   0.7863
3          CNN  0.9487   0.8615
4     Xception  0.9077   0.8520